@InProceedings{FerreiraMartNasc:2021:SyReHu,
author = "Ferreira, Jo{\~a}o Pedro Moreira and Martins, Renato and
Nascimento, Erickson Rangel",
affiliation = "{Universidade Federal de Minas Gerais} and {Universit{\'e}
Bourgogne Franche-Comt{\'e}} and {Universidade Federal de Minas
Gerais}",
title = "Synthesizing realistic human dance motions conditioned by musical
data using graph convolutional networks",
booktitle = "Proceedings...",
year = "2021",
editor = "Paiva, Afonso and Menotti, David and Baranoski, Gladimir V. G. and
Proen{\c{c}}a, Hugo Pedro and Junior, Antonio Lopes Apolinario
and Papa, Jo{\~a}o Paulo and Pagliosa, Paulo and dos Santos,
Thiago Oliveira and e S{\'a}, Asla Medeiros and da Silveira,
Thiago Lopes Trugillo and Brazil, Emilio Vital and Ponti, Moacir
A. and Fernandes, Leandro A. F. and Avila, Sandra",
organization = "Conference on Graphics, Patterns and Images, 34. (SIBGRAPI)",
publisher = "Sociedade Brasileira de Computa{\c{c}}{\~a}o",
address = "Porto Alegre",
keywords = "Human motion generation, sound and dance processing, multimodal
learning, conditional adversarial nets, graph convolutional neural
networks.",
abstract = "Learning to move naturally from music, i.e., to dance, is one of
the most complex motions humans often perform effortlessly.
Synthesizing human motion through learning techniques is becoming
an increasingly popular approach to alleviating the requirement of
new data capture to produce animations. Most approaches,
addressing the problem of automatic dance motion synthesis with
classical convolutional and recursive neural models, undergo
training and variability issues due to the non-Euclidean geometry
of the motion manifold structure. In this thesis, we design a
novel method based on graph convolutional networks, that overcome
the aforementioned issues, to tackle the problem of automatic
dance generation from audio information. Our method uses an
adversarial learning scheme conditioned on the input music audios
to create natural motions preserving the key movements of
different music styles. We also collected, annotated and made
publicly available a novel multimodal dataset with paired audio,
motion data and videos of people dancing three different music
styles, as a common ground to evaluate dance generation
approaches. The results suggest that the proposed GCN model
outperforms the state-of-the-art dance generation method
conditioned on music in different experiments. Moreover, our
graph-convolutional approach is simpler, easier to be trained, and
capable of generating more realistic motion styles regarding
qualitative and different quantitative metrics. It also presents a
visual movement perceptual quality comparable to real motion data.
The dataset, source code, and qualitative results are available on
the project's webpage:
https://verlab.github.io/Learning2Dance_CAG_2020/.",
conference-location = "Gramado, RS, Brazil (virtual)",
conference-year = "18-22 Oct. 2021",
language = "en",
ibi = "8JMKD3MGPEW34M/45DAPHE",
url = "http://urlib.net/ibi/8JMKD3MGPEW34M/45DAPHE",
targetfile = "wtd-sibgrapi-joao.pdf",
urlaccessdate = "2024, May 06"
}